chama sri chandana 221710313010

In [ ]:
!wget --no-check-certificate \
    https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
    -O /tmp/cats_and_dogs_filtered.zip
--2020-07-14 01:40:48--  https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip
Resolving storage.googleapis.com (storage.googleapis.com)... 173.194.216.128, 173.194.217.128, 172.217.204.128, ...
Connecting to storage.googleapis.com (storage.googleapis.com)|173.194.216.128|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 68606236 (65M) [application/zip]
Saving to: ‘/tmp/cats_and_dogs_filtered.zip’

/tmp/cats_and_dogs_ 100%[===================>]  65.43M  97.3MB/s    in 0.7s    

2020-07-14 01:40:49 (97.3 MB/s) - ‘/tmp/cats_and_dogs_filtered.zip’ saved [68606236/68606236]

In [ ]:
import os
import zipfile
local_zip='/tmp/cats_and_dogs_filtered.zip'
zip_ref=zipfile.ZipFile(local_zip,'r')
zip_ref.extractall('/tmp')
zip_ref.close()
In [ ]:
os.listdir('/tmp/cats_and_dogs_filtered')
Out[ ]:
['vectorize.py', 'validation', 'train']
In [ ]:
os.listdir('/tmp/cats_and_dogs_filtered/train')
Out[ ]:
['cats', 'dogs']
In [ ]:
print(len(os.listdir('/tmp/cats_and_dogs_filtered/train/dogs')))
print(len(os.listdir('/tmp/cats_and_dogs_filtered/train/cats')))
1000
1000
In [ ]:
base_dir='/tmp/cats_and_dogs_filtered'
train_dir=os.path.join(base_dir,'train')
validation_dir=os.path.join(base_dir,'validation')

train_dogs_dir=os.path.join(train_dir,'dogs')
train_cats_dir=os.path.join(train_dir,'cats')

val_dogs_dir=os.path.join(validation_dir,'dogs')
val_cats_dir=os.path.join(validation_dir,'cats')
In [ ]:
train_cats_filenames=os.listdir(train_cats_dir)
train_cats_filenames[:4]
Out[ ]:
['cat.817.jpg', 'cat.628.jpg', 'cat.634.jpg', 'cat.196.jpg']
In [ ]:
import matplotlib.pyplot as plt
%matplotlib inline
In [ ]:
plt.imshow(plt.imread(os.path.join(train_cats_dir,train_cats_filenames[0])))
Out[ ]:
<matplotlib.image.AxesImage at 0x7f357c180358>
In [ ]:
import random
In [ ]:
images=[]
plt.figure(figsize=(16,16))
for i in range(16):
  plt.subplot(4,4,i+1)
  imgname=random.choice(train_cats_filenames)
  images.append(imgname)
  img=plt.imread(os.path.join(train_cats_dir,imgname))
  plt.imshow(img)
  plt.title(img.shape)
In [ ]:
plt.figure(figsize=(16,16))
j=1
for i in images:
  img=plt.imread(os.path.join(train_cats_dir,i))
  plt.subplot(4,4,j)
  plt.hist(img.flat)
  j+=1
In [ ]:
train_dogs_filenames=os.listdir(train_dogs_dir)
train_dogs_filenames[:4]
Out[ ]:
['dog.103.jpg', 'dog.1.jpg', 'dog.984.jpg', 'dog.989.jpg']
In [ ]:
dogs_images=[]
plt.figure(figsize=(16,16))
for i in range(16):
  plt.subplot(4,4,i+1)
  imgname=random.choice(train_dogs_filenames)
  dogs_images.append(imgname)
  img=plt.imread(os.path.join(train_dogs_dir,imgname))
  plt.imshow(img)
  plt.title(img.shape)
In [ ]:
plt.figure(figsize=(16,16))
j=1
for i in dogs_images:
  img=plt.imread(os.path.join(train_dogs_dir,i))
  plt.subplot(4,4,j)
  plt.hist(img.flat)
  j+=1
In [ ]:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(train_dir,target_size=(150, 150),batch_size=20,class_mode='binary')

validation_generator = val_datagen.flow_from_directory(validation_dir,target_size=(150, 150),batch_size=20,class_mode='binary')
Found 2000 images belonging to 2 classes.
Found 1000 images belonging to 2 classes.
In [ ]:
img,labels=train_generator.next()
print(img.shape)
print(labels.shape)
(20, 150, 150, 3)
(20,)
In [ ]:
plt.figure(figsize=(16,16))
for i in range(20):
  plt.subplot(4,5,i+1)
  plt.imshow(img[i,:,:,:])
  plt.title(labels[i])
  plt.axis("off")
In [ ]:
## import required methods
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,Dense,Flatten,MaxPooling2D
In [ ]:
model = Sequential()
## add a conv layer followed by maxpooling
model.add(Conv2D(16,3,activation='relu',input_shape=(150,150,3)))
model.add(MaxPooling2D(2))
## add a conv layer followed by maxpooling
model.add(Conv2D(32,3,activation='relu',input_shape=(150,150,3)))
model.add(MaxPooling2D(2))
## add a conv layer followed by maxpooling
model.add(Conv2D(64,3,activation='relu',input_shape=(150,150,3)))
model.add(MaxPooling2D(2))
# Convert the featuremap into 1D  array
model.add(Flatten())
# Fully connected layer with 512 neurons
model.add(Dense(512,activation='relu'))
## Final output layer
model.add(Dense(1,activation='sigmoid'))

#let us see the summary
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 148, 148, 16)      448       
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 74, 74, 16)        0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 72, 72, 32)        4640      
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 36, 36, 32)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 34, 34, 64)        18496     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 17, 17, 64)        0         
_________________________________________________________________
flatten (Flatten)            (None, 18496)             0         
_________________________________________________________________
dense (Dense)                (None, 512)               9470464   
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 513       
=================================================================
Total params: 9,494,561
Trainable params: 9,494,561
Non-trainable params: 0
_________________________________________________________________
In [ ]:
### Compiling the model
import tensorflow as tf

model.compile(loss=tf.keras.losses.BinaryCrossentropy(),metrics=['accuracy'])
In [ ]:
history=model.fit(train_generator,epochs=15,validation_data=validation_generator,batch_size=32)
Epoch 1/15
100/100 [==============================] - 57s 569ms/step - loss: 0.3710 - accuracy: 0.8290 - val_loss: 0.5711 - val_accuracy: 0.6970
Epoch 2/15
100/100 [==============================] - 60s 600ms/step - loss: 0.3016 - accuracy: 0.8750 - val_loss: 0.6887 - val_accuracy: 0.7030
Epoch 3/15
100/100 [==============================] - 58s 578ms/step - loss: 0.2175 - accuracy: 0.9110 - val_loss: 0.6950 - val_accuracy: 0.7200
Epoch 4/15
100/100 [==============================] - 59s 588ms/step - loss: 0.1517 - accuracy: 0.9420 - val_loss: 0.9767 - val_accuracy: 0.6990
Epoch 5/15
100/100 [==============================] - 62s 622ms/step - loss: 0.1233 - accuracy: 0.9585 - val_loss: 0.8746 - val_accuracy: 0.7340
Epoch 6/15
100/100 [==============================] - 59s 586ms/step - loss: 0.0797 - accuracy: 0.9710 - val_loss: 1.0391 - val_accuracy: 0.7100
Epoch 7/15
100/100 [==============================] - 61s 609ms/step - loss: 0.0693 - accuracy: 0.9840 - val_loss: 1.3404 - val_accuracy: 0.7260
Epoch 8/15
100/100 [==============================] - 63s 628ms/step - loss: 0.0544 - accuracy: 0.9845 - val_loss: 1.5159 - val_accuracy: 0.7030
Epoch 9/15
100/100 [==============================] - 58s 580ms/step - loss: 0.0315 - accuracy: 0.9905 - val_loss: 1.9395 - val_accuracy: 0.7030
Epoch 10/15
100/100 [==============================] - 57s 574ms/step - loss: 0.0495 - accuracy: 0.9850 - val_loss: 2.4403 - val_accuracy: 0.6770
Epoch 11/15
100/100 [==============================] - 57s 573ms/step - loss: 0.0658 - accuracy: 0.9805 - val_loss: 1.5163 - val_accuracy: 0.7180
Epoch 12/15
100/100 [==============================] - 58s 577ms/step - loss: 0.0777 - accuracy: 0.9860 - val_loss: 1.6687 - val_accuracy: 0.7190
Epoch 13/15
100/100 [==============================] - 58s 579ms/step - loss: 0.0294 - accuracy: 0.9910 - val_loss: 2.6014 - val_accuracy: 0.6640
Epoch 14/15
100/100 [==============================] - 57s 575ms/step - loss: 0.0166 - accuracy: 0.9925 - val_loss: 2.2197 - val_accuracy: 0.7160
Epoch 15/15
100/100 [==============================] - 57s 572ms/step - loss: 0.0271 - accuracy: 0.9915 - val_loss: 1.7499 - val_accuracy: 0.6940
In [ ]:
train_acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
train_loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = list(range(1,16))
plt.figure(figsize=(16,4))
plt.subplot(1,2,1)
plt.plot(epochs,train_acc,label='train_acc')
plt.plot(epochs,val_acc,label='val_acc')
plt.title('accuracy')
plt.legend()
plt.subplot(1,2,2)
plt.plot(epochs,train_loss,label='train_loss')
plt.plot(epochs,val_loss,label='val_loss')
plt.title('loss')
plt.legend()
Out[ ]:
<matplotlib.legend.Legend at 0x7f352d4a1a20>
In [ ]:
model1 = Sequential()
## add a conv layer followed by maxpooling
model1.add(Conv2D(128,3,activation='relu',input_shape=(150,150,3)))
model1.add(MaxPooling2D(2))
## add a conv layer followed by maxpooling
model1.add(Conv2D(64,3,activation='relu',input_shape=(150,150,3)))
model1.add(MaxPooling2D(2))
## add a conv layer followed by maxpooling
model1.add(Conv2D(64,3,activation='relu',input_shape=(150,150,3)))
model1.add(MaxPooling2D(2))
## add a conv layer followed by maxpooling
model1.add(Conv2D(32,3,activation='relu',input_shape=(150,150,3)))
model1.add(MaxPooling2D(2))
# Convert the featuremap into 1D  array
model1.add(Flatten())
# Fully connected layer with 512 neurons
model1.add(Dense(512,activation='relu'))
## Final output layer
model1.add(Dense(1,activation='softmax'))

#let us see the summary
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 148, 148, 16)      448       
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 74, 74, 16)        0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 72, 72, 32)        4640      
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 36, 36, 32)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 34, 34, 64)        18496     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 17, 17, 64)        0         
_________________________________________________________________
flatten (Flatten)            (None, 18496)             0         
_________________________________________________________________
dense (Dense)                (None, 512)               9470464   
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 513       
=================================================================
Total params: 9,494,561
Trainable params: 9,494,561
Non-trainable params: 0
_________________________________________________________________
In [ ]:
model1.compile(loss=tf.keras.losses.BinaryCrossentropy(),metrics=['accuracy'])
In [28]:
history1=model.fit(train_generator,epochs=7,validation_data=validation_generator,batch_size=32)
Epoch 1/7
100/100 [==============================] - 57s 573ms/step - loss: 0.0236 - accuracy: 0.9920 - val_loss: 2.2425 - val_accuracy: 0.7150
Epoch 2/7
100/100 [==============================] - 57s 569ms/step - loss: 0.0217 - accuracy: 0.9940 - val_loss: 2.6586 - val_accuracy: 0.7390
Epoch 3/7
100/100 [==============================] - 59s 586ms/step - loss: 0.1431 - accuracy: 0.9875 - val_loss: 2.5444 - val_accuracy: 0.7260
Epoch 4/7
100/100 [==============================] - 58s 580ms/step - loss: 0.0142 - accuracy: 0.9950 - val_loss: 2.7649 - val_accuracy: 0.7150
Epoch 5/7
100/100 [==============================] - 58s 579ms/step - loss: 0.0493 - accuracy: 0.9895 - val_loss: 2.1664 - val_accuracy: 0.7100
Epoch 6/7
100/100 [==============================] - 58s 578ms/step - loss: 0.0043 - accuracy: 0.9985 - val_loss: 3.6356 - val_accuracy: 0.7000
Epoch 7/7
100/100 [==============================] - 58s 577ms/step - loss: 0.0322 - accuracy: 0.9925 - val_loss: 2.7591 - val_accuracy: 0.7220
In [29]:
train_acc = history1.history['accuracy']
val_acc = history1.history['val_accuracy']
train_loss = history1.history['loss']
val_loss = history1.history['val_loss']
epochs = list(range(1,8))
plt.figure(figsize=(16,4))
plt.subplot(1,2,1)
plt.plot(epochs,train_acc,label='train_acc')
plt.plot(epochs,val_acc,label='val_acc')
plt.title('accuracy')
plt.legend()
plt.subplot(1,2,2)
plt.plot(epochs,train_loss,label='train_loss')
plt.plot(epochs,val_loss,label='val_loss')
plt.title('loss')
plt.legend()
Out[29]:
<matplotlib.legend.Legend at 0x7f352e659f60>
In [ ]: